封装TensorFlow神经网络

为了参加今年的软件杯设计大赛,这几个月学习了很多新知识。现在大赛的第二轮作品优化已经提交,开始对这四个月所学知识做一些总结与记录。

用TensorFlow搭建神经网络。TensorFlow将神经网络的进行封装,使得深度学习变得简单已用,即使是不懂的深度学习算法原理的人都可以很容易的搭建各种神经网络的模型。我为了搭建神经网络更加方便,对TensorFlow做了自己的封装。

神经网络的封装代码:

class NN:
    '''
        shape:训练集数据的维度,shape[0]表示输入数据维度,shape[1]表标签数据维度
    '''
    def __init__(self,shape):
        with tf.name_scope("inputs"):
            self._xs = tf.placeholder(tf.float32,[None,shape[0]],name="features")
            self._ys = tf.placeholder(tf.float32,[None,shape[1]],name="labels")
            self._keep_prob = tf.placeholder(tf.float32,name="keep_prob")
        # 每一层的输出
        self._layers_out_lst = [self._xs]
        # 损失值
        self._loss = None
        # 预测值
        self._prediction = None
        # 运行session
        self._sess = None
        # 训练步骤
        self._train_step = None
        self._saver = None
        pass
    
    '''
        添加一个神经层
    '''
    def add_layer(self,in_size,out_size,activation_function=None,name=None):
        with tf.name_scope(name):
            with tf.name_scope("{}_weight".format(name)):
                # 权值矩阵
                weights = tf.Variable(tf.random_normal([in_size,out_size],dtype=tf.float32),name="weight")
            with tf.name_scope("{}_biases".format(name)):
                # 偏置量
                biases = tf.Variable(tf.zeros([1,out_size],dtype=tf.float32)+0.1)
            with tf.name_scope("{}_w_plus_b".format(name)):
                # 计算输出
                wx_plus_b = tf.matmul(self._layers_out_lst[-1],weights) + biases
            with tf.name_scope("{}_output".format(name)):
                if activation_function is None:
                    outputs = wx_plus_b
                else:
                    outputs = activation_function(wx_plus_b)
                tf.summary.histogram("{}_output".format(name),outputs)
        self._prediction =outputs
        self._layers_out_lst.append(outputs)
        self._timer = None
        pass
    
    def add_loss_layer(self,cost_function,name=None):
        # self._loss = tf.reduce_mean()
        with tf.name_scope("loss"):
            self._loss = cost_function(
                onehot_labels=self._ys,logits=self._layers_out_lst[-1])
            tf.summary.scalar("loss",self._loss)
    
    '''
        添加优化器,optimizer是TensorFlow框架提供的优化器,lr是学习率
    '''
    def add_optimizer(self,optimizer,lr=0.01):
        with tf.name_scope("train_step"):
            self._train_step = optimizer(lr).minimize(self._loss)
    
    def add_dropout(self,name=None):
        with tf.name_scope(name):
            x = self._layers_out_lst[-1]
            x = tf.nn.dropout(x,keep_prob=self._keep_prob,name=name)
            self._layers_out_lst.append(x)

    '''
        inputs 是输入的训练数据
        labels 是训练数据的标签
        echop 表示训练次数
    '''
    def fit(self,inputs,labels,echop=100,keep_prob=0.5, savepath=None,logdir='logs',step=10):
        # 如果savepath不为none,要保存训练的模型
        if savepath is not None:
            self._saver = tf.train.Saver()
        # 初始化变量
        init = tf.global_variables_initializer()
        # 如果回话为none,说明还没有载入模型,需要打开回话
        if self._sess is None :
            self._sess = tf.Session()   
        # 记录训练过程
        merge = tf.summary.merge_all()
        # 可视化工具
        writer = tf.summary.FileWriter(logdir,self._sess.graph)
        # run初始化参数
        self._sess.run(init)
        self.initProgressbar()
        for i in range(1,echop+1):
            feed_dict = {self._xs:inputs,self._ys:labels,self._keep_prob:keep_prob}
            self._sess.run(self._train_step,feed_dict=feed_dict)
            if i % step == 0:
                g = self._sess.run(merge,feed_dict=feed_dict)
                writer.add_summary(g,global_step=i)
                cost = self._sess.run(self._loss,feed_dict=feed_dict)
                print("当前损失:%s"%(str(cost)),end='')
                self.showProgressBar(i/echop)
                    
                if savepath:
                    self._saver.save(self._sess,savepath,global_step=i)
        pass

    def predict(self,inputs):
        prediction = tf.argmax(self._prediction,1)
        res = self._sess.run(prediction,feed_dict={self._xs:inputs,self._keep_prob:1.0})
        # labels = np.array([True,False])
        return res

    def evaluate(self,inputs,labels):
        # y_pre = self._sess.run(self._prediction,feed_dict={self._xs:inputs,self._ys:labels,self._keep_prob:1})
        correct_prediction = tf.equal(tf.argmax(self._prediction,1),tf.argmax(labels,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        result = self._sess.run(accuracy,feed_dict={self._xs:inputs,self._ys:labels,self._keep_prob:1.0})
        return result

    def restore(self,savepath):
        self._saver = tf.train.Saver()
        self._sess = tf.Session()
        self._saver.restore(self._sess,savepath)
    
    def initProgressbar(self):
        self._timer = Timer()
    
    def showProgressBar(self,rate):
        s = self._timer.format("%dh %dm %ds %dms")
        # length = int(round(rate*100))
        # s1 = "#"*length
        # s2 = "-"*(100-length)
        # print("[",s1,s2,"]",s,rate)
        print(s,"%4.2f%%"%(rate*100))

    def __del__(self):
        if self._sess is not None:
            self._sess.close()

封装完毕后建立一个神经网络的代码:

def getNetWork(initshape=(205,2),path=None):
    # 创建一个神经网络对象
    nn = NN(initshape)
    # 创建神经网络的结构
    nn.add_layer(initshape[0],128,tf.nn.relu,name="input_layer")
    nn.add_layer(128,64,tf.nn.sigmoid,name="hidden_layer_1")
    nn.add_dropout(name="dropout_1")
    nn.add_layer(64,32,tf.nn.tanh,name="hidden_layer_2")
    nn.add_dropout(name="dropout_2")
    nn.add_layer(32,16,tf.nn.softplus,name="hidden_layer_3")
    nn.add_layer(16,2,tf.nn.elu,name="hidden_layer_4")
    nn.add_dropout(name="dropout_3")
    nn.add_loss_layer(tf.losses.softmax_cross_entropy,name="cost_layer")
    nn.add_optimizer(tf.train.AdamOptimizer,lr=0.001)
    return nn

 

封装卷积神经网络:

class CNN:
    '''
        shape:训练集的形状shape[0]表示输入feature的形状,shape[1]表示标签的形状
    '''
    def __init__(self,**args):
        x_shape = args.get("x_shape")
        y_shape = args.get("y_shape")
        self._savepath = args.get("savepath")
        self._logdir = args.get("logdir","log")
        with tf.name_scope("inputs_plcaceholders"):
            self._xs = tf.placeholder(tf.float32,x_shape)
            self._ys = tf.placeholder(tf.float32,y_shape)
            self._keep_prob = tf.placeholder(tf.float32)
        self._layer_outs = [self._xs]
        self._loss = None
        self._train_step = None
        self._sess = None
        self._saver = None
        pass
    def __del__(self):
        if self._sess is not None:
            if self._saver is not None:
                self._saver.save(self._sess,self._savepath)
            self._sess.close()
        pass

    '''
        features:训练集特征值
        labels:训练集标签
        keep_prob:dropout保留率
        train_rate:训练集中用来做训练的数据的比例
        echop:训练次数
        step:每次测试经过的训练次数
    '''
    def fit(self,features,labels,keep_prob=0.8,train_rate=1,echop=100,step=10):
        # 训练集长度
        length = int(train_rate * len(features))
        def b():
            return features[:length],labels[:length]
        self.fitbatch(b,keep_prob,echop=echop,step=step)
        pass

    def fitbatch(self,get_batch=None,keep_prob=0.8,echop=100,step=10):
        if self._savepath:
            self._saver = tf.train.Saver()
        # 初始化参数
        init = tf.global_variables_initializer()
        # 开启回话
        self._sess = tf.Session()
        # 记录训练过程
        merge = tf.summary.merge_all()
        # 可视化工具
        writer = tf.summary.FileWriter(self._logdir,self._sess.graph)
        # 加载参数
        self._sess.run(init)
        timer = network.Timer()
        # 开始训练
        for counter in range(1,echop+1):
            features,labels = get_batch(counter%6)
            # 填充词典
            feed_dict = {self._xs:features,self._ys:labels,self._keep_prob:keep_prob}
            if counter == 1:
                for out in self._layer_outs:
                    tmp = self._sess.run(out,feed_dict=feed_dict)
                    print(tmp.shape)
            # 训练
            self._sess.run(self._train_step,feed_dict=feed_dict)
            # 显示预测结果
            if counter % step == 0:
                cost = self._sess.run(self._loss,feed_dict=feed_dict)
                print(counter,'\tcost',cost,end='\t')
                timer.log()
                # 记录训练过程中的参数
                g = self._sess.run(merge,feed_dict=feed_dict)
                writer.add_summary(g,global_step=counter)
                

    def restore(self):
        self._saver = tf.train.Saver()
        self._sess = tf.Session()
        self._saver.restore(self._sess,self._savepath)
        pass

    def predict(self,features):
        prediction = tf.argmax(self._layer_outs[-1],1)
        # prediction=self._sess.run(self._layer_outs[-1],feed_dict={self._xs:features,self._keep_prob:1.0})
        return self._sess.run(prediction,feed_dict={self._xs:features,self._keep_prob:1.0})

    def evaluate(self,features,labels):
        correct_prediction = tf.equal(tf.argmax(self._layer_outs[-1],1),tf.argmax(self._ys,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
        feed_dict = {self._xs:features,self._ys:labels,self._keep_prob:1.0}
        return self._sess.run(accuracy,feed_dict=feed_dict)
        
    def _get_w_b(self,shape,name=None):
        with tf.name_scope("{}_weights".format(name)):
            initial=tf.truncated_normal(shape=shape,stddev=0.01)
            w = tf.Variable(initial,name="{}_weights".format(name))
        with tf.name_scope("{}_bias".format(name)):
            b = tf.Variable(tf.constant(0.1,shape=[shape[-1]]),name="{}_bias".format(name))
        return w,b
    '''
        shape:窗口形状,长宽高与输出高
    '''
    def add_layer(self,shape,stddev=0.01,strides=[1,10,10,1],padding='SAME',activate_function=tf.nn.relu,name=None):
        if name is None:
            name = "hidden_layer_%d"%len(self._layer_outs)
        with tf.name_scope(name):
            w,b = self._get_w_b(shape,name=name)
            outputs = tf.nn.conv2d(self._layer_outs[-1],w,strides=strides,padding=padding,name=name) + b
            if activate_function:
                outputs = activate_function(outputs) 
            self._layer_outs.append(outputs)
            tf.summary.histogram("{}_output".format(name),outputs)
        pass
    '''
        ksize:窗口大小
        strides:步长
        padding:填充方式
    '''
    def add_max_pool(self,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME",name=None):
        if name is None:
            name = "max_pool_layer_%d"%len(self._layer_outs)
        with tf.name_scope(name):
            outputs = tf.nn.max_pool(self._layer_outs[-1],ksize=ksize,strides=strides,padding=padding,name=name)
            self._layer_outs.append(outputs)
            tf.summary.histogram("{}_output".format(name),outputs)
        pass

    def add_dropout(self,name=None):
        if name is None:
            name = "dropout_%d"%len(self._layer_outs)
        with tf.name_scope(name):
            outputs = tf.nn.dropout(self._layer_outs[-1],self._keep_prob,name=name)
            self._layer_outs.append(outputs)
        pass

    def add_cost_layer(self,activate_function,name=None):
        if name is None:
            name = "cost_layer"
        with tf.name_scope(name):
            # self._loss = tf.reduce_mean(activate_function(self._ys,self._layer_outs[-1]))
            self._loss = activate_function(self._ys,self._layer_outs[-1])
            tf.summary.scalar("loss",self._loss)
        pass

    def addOptimizer(self,optimizer,lr=0.01):
        self._train_step = optimizer(lr).minimize(self._loss)
        pass
    '''
        shape:shape[0]表示输入上一层输出的神经元个数(本层需要接受的个数)
            shape[1]表示本层神经元的个数(本层输出的个数)
    '''
    def add_full_layer(self,shape,activate_function=tf.nn.relu,first=False,name=None):
        if name is None:
            name = "full_layer_%d"%len(self._layer_outs)
        with tf.name_scope(name):
            w,b = self._get_w_b(shape,name=name)
            if first:
                x = tf.reshape(self._layer_outs[-1],[-1,shape[0]])
            else:
                x = self._layer_outs[-1]
            outputs = activate_function(tf.matmul(x,w)+b)
            self._layer_outs.append(outputs)
            tf.summary.histogram("{}_output".format(name),outputs)
        pass

建立卷积神经网络的代码:

def main(isTrain=True):
    cnn = CNN(x_shape=[None,102,102,2],
        y_shape=[None,2],
        logdir=r"E:\iqaa\logs",
        savepath=r"E:\iqaa\model\model.ckpt")
    cnn.add_layer([5,5,2,4],strides=[1,1,1,1])
    cnn.add_max_pool(ksize=[1,3,3,1],strides=[1,3,3,1])
    cnn.add_layer([5,5,4,8],strides=[1,1,1,1])
    cnn.add_dropout()
    cnn.add_layer([5,5,8,16],strides=[1,1,1,1])
    cnn.add_max_pool([1,3,3,1],[1,3,3,1])
    cnn.add_full_layer([12*12*16,64],first=True)
    cnn.add_dropout()
    cnn.add_full_layer([64,2],first=False) 
    cnn.add_cost_layer(lambda ys,y:tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys,logits=y)))
    cnn.addOptimizer(tf.train.AdamOptimizer,0.001)
    if isTrain:
        cnn.fitbatch(lambda i : DataUtil.getbatch(i),keep_prob=0.8,echop=1000,step=20)
        features,labels = DataUtil.getbatch(6)
        print(cnn.evaluate(features,labels))
    else:
        cnn.restore()
    return cnn

 

posted @ 2018-07-31 16:56  悦尔  阅读(2348)  评论(0编辑  收藏  举报